[1]:
import random
import pandas as pd
[3]:
d1=[]
d2=[]
d3=[]
d4=[]
for i in range(50000):
x=random.random()
d1.append(x)
y=random.random()
d2.append(y)
z=random.random()
d3.append(z)
d4.append(2*x+3*y+z)
[5]:
df=pd.DataFrame({'x1':d1,'x2':d2,'x3':d3,'x4':d4},index=range(0,50000))
[7]:
X = df.iloc[:,0:3]
Y = df.iloc[:,3]
[9]:
import tensorflow as tf
from sklearn.model_selection import train_test_split
[11]:
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
[15]:
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9,input_dim=3, activation='relu'),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(optimizer='adam',
loss='mse',
metrics=['accuracy'])
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
[17]:
model.fit(X_train, y_train, epochs=20)
Epoch 1/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 6s 2ms/step - accuracy: 0.0000e+00 - loss: 2.3078 Epoch 2/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 0.0040 Epoch 3/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 7.7983e-04 Epoch 4/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 8.5066e-05 Epoch 5/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 3.6086e-05 Epoch 6/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.0000e+00 - loss: 2.0163e-05 Epoch 7/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.0294e-05 Epoch 8/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 4.7881e-06 Epoch 9/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 2.4368e-06 Epoch 10/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.6454e-06 Epoch 11/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.5711e-06 Epoch 12/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 5s 2ms/step - accuracy: 0.0000e+00 - loss: 2.0516e-06 Epoch 13/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.1975e-06 Epoch 14/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.0248e-06 Epoch 15/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 2.1412e-06 Epoch 16/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 2.1384e-07 Epoch 17/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.4165e-06 Epoch 18/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.2556e-06 Epoch 19/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.6730e-06 Epoch 20/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.0000e+00 - loss: 1.0331e-06
[17]:
<keras.src.callbacks.history.History at 0x1cdf1b05ac0>
[19]:
y_pred=model.predict(X_test)
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
[21]:
from sklearn.metrics import r2_score
[23]:
r2_score(y_pred, y_test)
[23]:
0.999999996079316
[25]:
import matplotlib.pyplot as plt
%matplotlib inline
[18]:
metrics_df = pd.DataFrame(model.history.history)
metrics_df.plot()
[18]:
<matplotlib.axes._subplots.AxesSubplot at 0x3cd7882308>
[1]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# Generating synthetic data
d1 = []
d2 = []
d3 = []
d4 = []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2*x + 3*y + z) # Target variable
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4}, index=range(0, 50000))
# Features and target
X = df.iloc[:, 0:3] # Features (x1, x2, x3)
Y = df.iloc[:, 3] # Target (x4)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
# Convert to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values
# Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
# Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=20)
# Make predictions
y_pred = model.predict(X_test)
# Evaluate performance using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 691us/step - accuracy: 0.0000e+00 - loss: 2.4579 Epoch 2/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 2.2003e-04 Epoch 3/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 699us/step - accuracy: 0.0000e+00 - loss: 7.5260e-05 Epoch 4/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 4.3746e-05 Epoch 5/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 683us/step - accuracy: 0.0000e+00 - loss: 1.7932e-05 Epoch 6/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 686us/step - accuracy: 0.0000e+00 - loss: 8.9144e-06 Epoch 7/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 3.7749e-06 Epoch 8/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 676us/step - accuracy: 0.0000e+00 - loss: 1.6104e-06 Epoch 9/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 661us/step - accuracy: 0.0000e+00 - loss: 9.2002e-07 Epoch 10/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 678us/step - accuracy: 0.0000e+00 - loss: 8.5859e-07 Epoch 11/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 719us/step - accuracy: 0.0000e+00 - loss: 1.8239e-06 Epoch 12/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 670us/step - accuracy: 0.0000e+00 - loss: 1.0040e-06 Epoch 13/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 677us/step - accuracy: 0.0000e+00 - loss: 1.8101e-06 Epoch 14/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 819us/step - accuracy: 0.0000e+00 - loss: 1.8927e-07 Epoch 15/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 748us/step - accuracy: 0.0000e+00 - loss: 5.4305e-07 Epoch 16/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 728us/step - accuracy: 0.0000e+00 - loss: 1.2576e-06 Epoch 17/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - accuracy: 0.0000e+00 - loss: 1.0757e-06 Epoch 18/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 790us/step - accuracy: 0.0000e+00 - loss: 1.0126e-06 Epoch 19/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 750us/step - accuracy: 0.0000e+00 - loss: 6.0779e-07 Epoch 20/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 723us/step - accuracy: 0.0000e+00 - loss: 1.5399e-06 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 708us/step R² score: 0.9999993176162081
[5]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula
# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})
# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4
# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values
# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])
# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)
# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)
# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 714us/step - accuracy: 0.0000e+00 - loss: 3.0730 Epoch 2/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 686us/step - accuracy: 0.0000e+00 - loss: 2.4699e-04 Epoch 3/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 667us/step - accuracy: 0.0000e+00 - loss: 5.9977e-05 Epoch 4/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 656us/step - accuracy: 0.0000e+00 - loss: 3.8079e-05 Epoch 5/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 660us/step - accuracy: 0.0000e+00 - loss: 2.6635e-05 Epoch 6/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 702us/step - accuracy: 0.0000e+00 - loss: 1.0837e-05 Epoch 7/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 7.4368e-06 Epoch 8/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 689us/step - accuracy: 0.0000e+00 - loss: 3.5923e-06 Epoch 9/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 1.8856e-06 Epoch 10/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 670us/step - accuracy: 0.0000e+00 - loss: 1.6077e-06 Epoch 11/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 702us/step - accuracy: 0.0000e+00 - loss: 1.6218e-06 Epoch 12/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 675us/step - accuracy: 0.0000e+00 - loss: 1.4404e-06 Epoch 13/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 6.6282e-07 Epoch 14/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - accuracy: 0.0000e+00 - loss: 1.2724e-06 Epoch 15/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 717us/step - accuracy: 0.0000e+00 - loss: 1.8796e-06 Epoch 16/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 713us/step - accuracy: 0.0000e+00 - loss: 1.8872e-06 Epoch 17/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 691us/step - accuracy: 0.0000e+00 - loss: 2.1026e-06 Epoch 18/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 677us/step - accuracy: 0.0000e+00 - loss: 5.8587e-07 Epoch 19/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 688us/step - accuracy: 0.0000e+00 - loss: 1.4093e-06 Epoch 20/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 709us/step - accuracy: 0.0000e+00 - loss: 1.7018e-06 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 703us/step R² score: 0.9999999454713913
[9]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula
# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})
# Print some sample x, y, z values
print("Sample data:")
print(df.head()) # Display first 5 rows of the data
# Save the data to a CSV file for further inspection if needed
df.to_csv('generated_data.csv', index=False)
print("Data saved to 'generated_data.csv'.")
# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4
# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values
# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])
# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)
# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)
# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')
Sample data:
x1 x2 x3 x4
0 0.269617 0.080424 0.718373 1.498878
1 0.563495 0.728264 0.032121 3.343904
2 0.191909 0.747924 0.703519 3.331108
3 0.094050 0.521800 0.547263 2.300764
4 0.864998 0.839702 0.178130 4.427231
Data saved to 'generated_data.csv'.
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 3s 1ms/step - accuracy: 0.0000e+00 - loss: 3.1564 Epoch 2/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 734us/step - accuracy: 0.0000e+00 - loss: 2.1667e-04 Epoch 3/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 683us/step - accuracy: 0.0000e+00 - loss: 6.2466e-05 Epoch 4/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 671us/step - accuracy: 0.0000e+00 - loss: 2.1148e-05 Epoch 5/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 681us/step - accuracy: 0.0000e+00 - loss: 9.4850e-06 Epoch 6/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 701us/step - accuracy: 0.0000e+00 - loss: 3.1111e-06 Epoch 7/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 680us/step - accuracy: 0.0000e+00 - loss: 2.2540e-06 Epoch 8/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 699us/step - accuracy: 0.0000e+00 - loss: 1.1908e-06 Epoch 9/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 675us/step - accuracy: 0.0000e+00 - loss: 1.1005e-06 Epoch 10/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 697us/step - accuracy: 0.0000e+00 - loss: 2.2073e-06 Epoch 11/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 667us/step - accuracy: 0.0000e+00 - loss: 1.0672e-06 Epoch 12/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 738us/step - accuracy: 0.0000e+00 - loss: 2.2186e-06 Epoch 13/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 760us/step - accuracy: 0.0000e+00 - loss: 1.4514e-07 Epoch 14/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 768us/step - accuracy: 0.0000e+00 - loss: 1.6680e-06 Epoch 15/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 764us/step - accuracy: 0.0000e+00 - loss: 2.1007e-06 Epoch 16/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 786us/step - accuracy: 0.0000e+00 - loss: 1.3550e-06 Epoch 17/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 833us/step - accuracy: 0.0000e+00 - loss: 7.3741e-07 Epoch 18/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 835us/step - accuracy: 0.0000e+00 - loss: 2.2035e-06 Epoch 19/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 800us/step - accuracy: 0.0000e+00 - loss: 2.4538e-06 Epoch 20/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 839us/step - accuracy: 0.0000e+00 - loss: 2.4899e-06 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 788us/step R² score: 0.9999998611216304
[11]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula
# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})
# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4
# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values
# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])
# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)
# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)
# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')
# Step 9: Plot graphs
# Scatter plot of actual vs predicted values
plt.figure(figsize=(10, 6))
plt.scatter(y_test, y_pred, alpha=0.5, color='blue')
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2, color='red')
plt.title('Actual vs Predicted Values')
plt.xlabel('Actual Values (y_test)')
plt.ylabel('Predicted Values (y_pred)')
plt.grid(True)
plt.show()
# 3D Scatter plot of features x1, x2, x3 against the target
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df['x1'], df['x2'], df['x3'], c=df['x4'], cmap='viridis', alpha=0.6)
ax.set_title('3D Scatter Plot of Features (x1, x2, x3) and Target (x4)')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('x3')
plt.show()
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 699us/step - accuracy: 0.0000e+00 - loss: 6.7616 Epoch 2/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 0.0350 Epoch 3/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 9.0078e-05 Epoch 4/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 666us/step - accuracy: 0.0000e+00 - loss: 2.2502e-05 Epoch 5/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 1.0236e-05 Epoch 6/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 694us/step - accuracy: 0.0000e+00 - loss: 3.6448e-06 Epoch 7/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 694us/step - accuracy: 0.0000e+00 - loss: 1.7947e-06 Epoch 8/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 671us/step - accuracy: 0.0000e+00 - loss: 9.5031e-07 Epoch 9/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 5.8944e-07 Epoch 10/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 677us/step - accuracy: 0.0000e+00 - loss: 4.8120e-07 Epoch 11/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 673us/step - accuracy: 0.0000e+00 - loss: 8.9556e-07 Epoch 12/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 691us/step - accuracy: 0.0000e+00 - loss: 7.9028e-07 Epoch 13/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 679us/step - accuracy: 0.0000e+00 - loss: 6.2363e-07 Epoch 14/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 672us/step - accuracy: 0.0000e+00 - loss: 8.8967e-07 Epoch 15/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 695us/step - accuracy: 0.0000e+00 - loss: 9.8036e-07 Epoch 16/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 674us/step - accuracy: 0.0000e+00 - loss: 9.4307e-07 Epoch 17/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 692us/step - accuracy: 0.0000e+00 - loss: 3.2975e-07 Epoch 18/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 676us/step - accuracy: 0.0000e+00 - loss: 3.3105e-07 Epoch 19/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 723us/step - accuracy: 0.0000e+00 - loss: 4.3320e-07 Epoch 20/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 695us/step - accuracy: 0.0000e+00 - loss: 7.4015e-07 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step R² score: 0.9999998184931179
C:\Users\nr802\AppData\Local\Temp\ipykernel_5420\4223667725.py:59: UserWarning: color is redundantly defined by the 'color' keyword argument and the fmt string "k--" (-> color='k'). The keyword argument will take precedence. plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2, color='red')
[13]:
import random
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import plotly.express as px
import plotly.graph_objects as go
# Step 1: Generate synthetic data
d1, d2, d3, d4 = [], [], [], []
for i in range(50000):
x = random.random()
d1.append(x)
y = random.random()
d2.append(y)
z = random.random()
d3.append(z)
d4.append(2 * x + 3 * y + z) # Target variable based on the formula
# Create a DataFrame
df = pd.DataFrame({'x1': d1, 'x2': d2, 'x3': d3, 'x4': d4})
# Step 2: Define features (X) and target (Y)
X = df.iloc[:, 0:3] # x1, x2, x3
Y = df.iloc[:, 3] # x4
# Step 3: Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# Convert Pandas DataFrame to NumPy arrays for TensorFlow compatibility
X_train = X_train.values
X_test = X_test.values
y_train = y_train.values
y_test = y_test.values
# Step 4: Define the model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(9, input_dim=3, activation='relu'), # Input layer with 3 inputs
tf.keras.layers.Dense(6, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='linear') # Output layer for regression
])
# Step 5: Compile the model
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
# Step 6: Train the model
model.fit(X_train, y_train, epochs=20, verbose=1)
# Step 7: Make predictions on the test set
y_pred = model.predict(X_test)
# Step 8: Evaluate the model using R² score
r2 = r2_score(y_test, y_pred)
print(f'R² score: {r2}')
# Step 9: Plotly Graphs
# 1. Scatter Plot: Actual vs Predicted Values
scatter_fig = px.scatter(
x=y_test, y=y_pred.flatten(),
labels={'x': 'Actual Values (y_test)', 'y': 'Predicted Values (y_pred)'},
title='Actual vs Predicted Values'
)
scatter_fig.add_trace(go.Scatter(
x=[y_test.min(), y_test.max()],
y=[y_test.min(), y_test.max()],
mode='lines',
line=dict(color='red', dash='dash'),
name='Ideal Fit Line'
))
scatter_fig.show()
# 2. 3D Scatter Plot: Features (x1, x2, x3) vs Target (x4)
scatter_3d_fig = px.scatter_3d(
df, x='x1', y='x2', z='x3', color='x4',
title='3D Scatter Plot of Features and Target',
labels={'x1': 'Feature x1', 'x2': 'Feature x2', 'x3': 'Feature x3', 'x4': 'Target x4'}
)
scatter_3d_fig.update_traces(marker=dict(size=3, opacity=0.7))
scatter_3d_fig.show()
Epoch 1/20
C:\Users\nr802\anaconda3\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 2s 728us/step - accuracy: 0.0000e+00 - loss: 2.5704 Epoch 2/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 697us/step - accuracy: 0.0000e+00 - loss: 1.1223e-04 Epoch 3/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 678us/step - accuracy: 0.0000e+00 - loss: 1.2298e-05 Epoch 4/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 676us/step - accuracy: 0.0000e+00 - loss: 3.5542e-06 Epoch 5/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 673us/step - accuracy: 0.0000e+00 - loss: 1.4461e-06 Epoch 6/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 697us/step - accuracy: 0.0000e+00 - loss: 1.0300e-06 Epoch 7/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 684us/step - accuracy: 0.0000e+00 - loss: 1.0092e-06 Epoch 8/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 727us/step - accuracy: 0.0000e+00 - loss: 1.2825e-06 Epoch 9/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 1.4362e-06 Epoch 10/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 686us/step - accuracy: 0.0000e+00 - loss: 1.5621e-06 Epoch 11/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 674us/step - accuracy: 0.0000e+00 - loss: 2.6883e-06 Epoch 12/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 700us/step - accuracy: 0.0000e+00 - loss: 1.6967e-06 Epoch 13/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 682us/step - accuracy: 0.0000e+00 - loss: 1.8244e-06 Epoch 14/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 691us/step - accuracy: 0.0000e+00 - loss: 3.3055e-06 Epoch 15/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 3.7872e-06 Epoch 16/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 687us/step - accuracy: 0.0000e+00 - loss: 2.6825e-06 Epoch 17/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 713us/step - accuracy: 0.0000e+00 - loss: 1.5076e-06 Epoch 18/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 701us/step - accuracy: 0.0000e+00 - loss: 2.6970e-06 Epoch 19/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 711us/step - accuracy: 0.0000e+00 - loss: 1.9525e-06 Epoch 20/20 1250/1250 ━━━━━━━━━━━━━━━━━━━━ 1s 696us/step - accuracy: 0.0000e+00 - loss: 2.2977e-06 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 654us/step R² score: 0.999993681133065
[ ]:
